knitr::opts_chunk$set(fig.align="center")
library(rstanarm)
library(tidyverse)
library(tidybayes)
library(modelr)
library(ggplot2)
library(magrittr)
library(emmeans)
library(bayesplot)
library(brms)
library(gganimate)
theme_set(theme_light())
In our experiment, we used a visualization recommendation algorithm (composed of one search algorithm and one oracle algorithm) to generate visualizations for the user on one of two datasets. We then asked the user to evaluate the tool on a variety of metrics (confidence in understanding data, confidence in answer, efficiency, ease of use, utility, and overall).
Given a search algorithm (bfs or dfs), an oracle (compassql or dziban), and a dataset (birdstrikes or movies), we would like to predict a user’s average score for a given metric. In addition, we would like to know if the choice of search algorithm and oracle has any meaningful impact on a user’s ratong for these metrics.
analyses = c("confidence.udata", "confidence.ans", "efficiency", "ease.of.use", "utility", "overall")
confidence_metrics = c("confidence.udata", "confidence.ans")
preference_metrics = c("efficiency", "ease.of.use", "utility", "overall")
user_response_data <- read.csv('processed_ptask_responses.csv')
analyses = c("confidence.udata", "confidence.ans", "efficiency", "ease.of.use", "utility", "overall")
user_response_data[,analyses] <- lapply(user_response_data[,analyses],ordered)
user_response_data <- user_response_data %>%
mutate(
dataset = as.factor(dataset),
oracle = as.factor(oracle),
search = as.factor(search),
task = as.factor(task)
)
models <- list()
search_differences <- list()
oracle_differences <- list()
alg_differences <- list()
seed = 12
filename = "confidence_udata"
models$confidence_udata <- brm(
formula = bf(confidence.udata ~ dataset * oracle * search),
family = cumulative("probit"),
prior = prior(normal(0.26, 1.26), class = Intercept),
chains = 2,
cores = 2,
iter = 2500,
warmup = 1000,
data = data,
control = list(adapt_delta = 0.99),
file = filename,
seed = seed
)
Check some diagnostics regarding our model. Rhat should be close to 1 and Bulk_ESS should be in the thousands.
summary(models$confidence_udata)
## Family: cumulative
## Links: mu = probit; disc = identity
## Formula: confidence.udata ~ dataset * oracle * search
## Data: data (Number of observations: 236)
## Samples: 2 chains, each with iter = 2500; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept[1] -1.80 0.24 -2.28 -1.32 1.00
## Intercept[2] -0.76 0.21 -1.18 -0.35 1.00
## Intercept[3] 0.77 0.21 0.36 1.19 1.00
## datasetmovies -0.06 0.29 -0.63 0.50 1.00
## oracledziban 0.27 0.29 -0.28 0.85 1.00
## searchdfs -0.17 0.29 -0.72 0.39 1.00
## datasetmovies:oracledziban -0.11 0.40 -0.90 0.67 1.00
## datasetmovies:searchdfs -0.06 0.40 -0.83 0.73 1.00
## oracledziban:searchdfs -0.05 0.41 -0.86 0.74 1.00
## datasetmovies:oracledziban:searchdfs 0.67 0.57 -0.47 1.78 1.00
## Bulk_ESS Tail_ESS
## Intercept[1] 1609 1693
## Intercept[2] 1481 1939
## Intercept[3] 1568 1978
## datasetmovies 1204 1519
## oracledziban 1301 1556
## searchdfs 1390 1617
## datasetmovies:oracledziban 1090 1446
## datasetmovies:searchdfs 1130 1324
## oracledziban:searchdfs 1225 1751
## datasetmovies:oracledziban:searchdfs 1117 1428
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## disc 1.00 0.00 1.00 1.00 1.00 3000 3000
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Trace plots help us check whether there is evidence of non-convergence for our model.
plot(models$confidence_udata)
In our pairs plots, we want to make sure we don’t have highly correlated parameters (highly correlated parameters means that our model has difficulty differentiating the effect of such parameters).
pairs(
models$confidence_udata,
pars = c("b_Intercept[1]",
"b_Intercept[2]",
"b_Intercept[3]"),
fixed = TRUE
)
pairs(
models$confidence_udata,
pars = c("b_datasetmovies",
"b_oracledziban",
"b_searchdfs"),
fixed = TRUE
)
We now look at a average response for confidence in understanding the data using different combinations of search and oracle via draws from the model posterior. The thicker, shorter line represents the 95% credible interval, while the thinner, longer line represents the 50% credible interval.
draw_data_confidence_udata <- user_response_data %>%
add_predicted_draws(models$confidence_udata,
seed = seed,
re_formula = NA) %>%
group_by(search, oracle, .draw) %>%
mutate(rating = weighted.mean(as.numeric(as.character(.prediction))))
confidence_udata_plot <- draw_data_confidence_udata %>%
ggplot(aes(x = oracle, y = rating)) +
stat_eye(.width = c(.95, .5)) +
theme_minimal() +
coord_cartesian(ylim = c(-2, 2)) +
facet_grid(. ~ search)
confidence_udata_plot
We can get the numeric values of the interval boundaries shown above with mean_qi
fit_info_confidence_udata <- draw_data_confidence_udata %>% group_by(search, oracle) %>% mean_qi(rating, .width = c(.95, .5))
fit_info_confidence_udata
## # A tibble: 8 x 8
## # Groups: search [2]
## search oracle rating .lower .upper .width .point .interval
## <fct> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs compassql 0.937 0.661 1.21 0.95 mean qi
## 2 bfs dziban 1.08 0.817 1.32 0.95 mean qi
## 3 dfs compassql 0.800 0.517 1.07 0.95 mean qi
## 4 dfs dziban 1.14 0.883 1.38 0.95 mean qi
## 5 bfs compassql 0.937 0.839 1.04 0.5 mean qi
## 6 bfs dziban 1.08 1 1.17 0.5 mean qi
## 7 dfs compassql 0.800 0.717 0.9 0.5 mean qi
## 8 dfs dziban 1.14 1.07 1.23 0.5 mean qi
## Saving 7 x 5 in image
Next, we want to see if there is any significant difference in completion time between the two search algorithms (bfs and dfs) and the two oracles (dzbian and compassql).
confidence_udata_predictive_data <- user_response_data %>% add_predicted_draws(models$confidence_udata, seed = seed, re_formula = NA)
confidence_udata_predictive_data$alg <- paste(confidence_udata_predictive_data$search, confidence_udata_predictive_data$oracle)
Differences in user score by search algorithm.
search_differences$confidence_udata <- confidence_udata_predictive_data %>%
group_by(search, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = search) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'search' (override with `.groups` argument)
search_differences$confidence_udata$metric = "confidence.udata"
search_differences$confidence_udata %>%
ggplot(aes(x = diff_in_rating, y = "confidence.udata")) +
xlab(paste0("Expected Difference in Rating (",search_differences$confidence_udata[1,'search'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by oracle.
oracle_differences$confidence_udata <- confidence_udata_predictive_data %>%
group_by(oracle, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = oracle) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'oracle' (override with `.groups` argument)
oracle_differences$confidence_udata$metric = "confidence.udata"
oracle_differences$confidence_udata %>%
ggplot(aes(x = diff_in_rating, y = "confidence.udata")) +
xlab(paste0("Expected Difference in Rating (",oracle_differences$confidence_udata[1,'oracle'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by search and oracle combination (dfs compassql vs bfs dziban only)
alg_differences$confidence_udata <- confidence_udata_predictive_data %>%
group_by(alg, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = alg) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'alg' (override with `.groups` argument)
alg_differences$confidence_udata <- subset(alg_differences$confidence_udata, alg == "dfs compassql - bfs dziban")
alg_differences$confidence_udata$metric = "confidence.udata"
alg_differences$confidence_udata %>%
ggplot(aes(x = diff_in_rating, y = "confidence.udata")) +
xlab(paste0("Expected Difference in Rating (",alg_differences$confidence_udata[1,'alg'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
filename = "confidence_ans"
models$confidence_ans <- brm(
formula = bf(confidence.ans ~ dataset * oracle * search),
family = cumulative("probit"),
prior = prior(normal(0.26, 1.26), class = Intercept),
chains = 2,
cores = 2,
iter = 2500,
warmup = 1000,
data = data,
control = list(adapt_delta = 0.99),
file = filename,
seed = seed
)
Check some diagnostics regarding our model. Rhat should be close to 1 and Bulk_ESS should be in the thousands.
summary(models$confidence_ans)
## Family: cumulative
## Links: mu = probit; disc = identity
## Formula: confidence.ans ~ dataset * oracle * search
## Data: data (Number of observations: 236)
## Samples: 2 chains, each with iter = 2500; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept[1] -2.56 0.36 -3.35 -1.91 1.00
## Intercept[2] -1.71 0.24 -2.20 -1.25 1.00
## Intercept[3] -0.91 0.21 -1.33 -0.51 1.00
## Intercept[4] 0.55 0.21 0.13 0.94 1.00
## datasetmovies -0.26 0.29 -0.83 0.28 1.00
## oracledziban 0.53 0.30 -0.04 1.13 1.00
## searchdfs 0.07 0.29 -0.50 0.64 1.00
## datasetmovies:oracledziban -0.32 0.40 -1.09 0.46 1.00
## datasetmovies:searchdfs 0.09 0.41 -0.71 0.90 1.00
## oracledziban:searchdfs -0.69 0.42 -1.52 0.13 1.00
## datasetmovies:oracledziban:searchdfs 1.07 0.58 -0.06 2.21 1.00
## Bulk_ESS Tail_ESS
## Intercept[1] 1538 1522
## Intercept[2] 1878 2275
## Intercept[3] 1603 2107
## Intercept[4] 1460 1910
## datasetmovies 1279 1398
## oracledziban 1364 1872
## searchdfs 1365 1917
## datasetmovies:oracledziban 1238 1867
## datasetmovies:searchdfs 1268 1786
## oracledziban:searchdfs 1284 1611
## datasetmovies:oracledziban:searchdfs 1259 1492
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## disc 1.00 0.00 1.00 1.00 1.00 3000 3000
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Trace plots help us check whether there is evidence of non-convergence for our model.
plot(models$confidence_ans)
In our pairs plots, we want to make sure we don’t have highly correlated parameters (highly correlated parameters means that our model has difficulty differentiating the effect of such parameters).
pairs(
models$confidence_ans,
pars = c("b_Intercept[1]",
"b_Intercept[2]",
"b_Intercept[3]"),
fixed = TRUE
)
pairs(
models$confidence_ans,
pars = c("b_datasetmovies",
"b_oracledziban",
"b_searchdfs"),
fixed = TRUE
)
We now look at a average response for confidence in answer using different combinations of search and oracle via draws from the model posterior. The thicker, shorter line represents the 95% credible interval, while the thinner, longer line represents the 50% credible interval.
draw_data_confidence_ans <- user_response_data %>%
add_predicted_draws(models$confidence_ans,
seed = seed,
re_formula = NA) %>%
group_by(search, oracle, .draw) %>%
mutate(rating = weighted.mean(as.numeric(as.character(.prediction))))
confidence_ans_plot <- draw_data_confidence_ans %>%
ggplot(aes(x = oracle, y = rating)) +
stat_eye(.width = c(.95, .5)) +
theme_minimal() +
coord_cartesian(ylim = c(-2, 2)) +
facet_grid(. ~ search)
confidence_ans_plot
We can get the numeric values of the interval boundaries shown above with mean_qi
fit_info_confidence_ans <- draw_data_confidence_ans %>% group_by(search, oracle) %>% mean_qi(rating, .width = c(.95, .5))
fit_info_confidence_ans
## # A tibble: 8 x 8
## # Groups: search [2]
## search oracle rating .lower .upper .width .point .interval
## <fct> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs compassql 0.957 0.643 1.23 0.95 mean qi
## 2 bfs dziban 1.19 0.917 1.43 0.95 mean qi
## 3 dfs compassql 1.04 0.75 1.3 0.95 mean qi
## 4 dfs dziban 1.19 0.917 1.43 0.95 mean qi
## 5 bfs compassql 0.957 0.857 1.07 0.5 mean qi
## 6 bfs dziban 1.19 1.1 1.28 0.5 mean qi
## 7 dfs compassql 1.04 0.933 1.13 0.5 mean qi
## 8 dfs dziban 1.19 1.1 1.28 0.5 mean qi
## Saving 7 x 5 in image
Next, we want to see if there is any significant difference in completion time between the two search algorithms (bfs and dfs) and the two oracles (dzbian and compassql).
confidence_ans_predictive_data <- user_response_data %>% add_predicted_draws(models$confidence_ans, seed = seed, re_formula = NA)
confidence_ans_predictive_data$alg <- paste(confidence_ans_predictive_data$search, confidence_ans_predictive_data$oracle)
Differences in user score by search algorithm.
search_differences$confidence_ans <- confidence_ans_predictive_data %>%
group_by(search, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = search) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'search' (override with `.groups` argument)
search_differences$confidence_ans$metric = "confidence.ans"
search_differences$confidence_ans %>%
ggplot(aes(x = diff_in_rating, y = "confidence.ans")) +
xlab(paste0("Expected Difference in Rating (",search_differences$confidence_ans[1,'search'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by oracle.
oracle_differences$confidence_ans <- confidence_ans_predictive_data %>%
group_by(oracle, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = oracle) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'oracle' (override with `.groups` argument)
oracle_differences$confidence_ans$metric = "confidence.ans"
oracle_differences$confidence_ans %>%
ggplot(aes(x = diff_in_rating, y = "confidence.ans")) +
xlab(paste0("Expected Difference in Rating (",oracle_differences$confidence_ans[1,'oracle'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by search and oracle combination (dfs compassql vs bfs dziban only)
alg_differences$confidence_ans <- confidence_ans_predictive_data %>%
group_by(alg, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = alg) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'alg' (override with `.groups` argument)
alg_differences$confidence_ans <- subset(alg_differences$confidence_ans, alg == "dfs compassql - bfs dziban")
alg_differences$confidence_ans$metric = "confidence.ans"
alg_differences$confidence_ans %>%
ggplot(aes(x = diff_in_rating, y = "confidence.ans")) +
xlab(paste0("Expected Difference in Rating (",alg_differences$confidence_ans[1,'alg'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
filename = "efficiency"
models$efficiency <- brm(
formula = bf(efficiency ~ dataset * oracle * search),
family = cumulative("probit"),
prior = prior(normal(0.26, 1.26), class = Intercept),
chains = 2,
cores = 2,
iter = 2500,
warmup = 1000,
data = data,
control = list(adapt_delta = 0.99),
file = filename,
seed = seed
)
Check some diagnostics regarding our model. Rhat should be close to 1 and Bulk_ESS should be in the thousands.
summary(models$efficiency)
## Family: cumulative
## Links: mu = probit; disc = identity
## Formula: efficiency ~ dataset * oracle * search
## Data: data (Number of observations: 236)
## Samples: 2 chains, each with iter = 2500; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept[1] -2.03 0.24 -2.50 -1.57 1.00
## Intercept[2] -0.99 0.21 -1.40 -0.57 1.00
## Intercept[3] -0.38 0.20 -0.76 0.02 1.00
## Intercept[4] 0.55 0.20 0.16 0.95 1.00
## datasetmovies -0.28 0.27 -0.79 0.26 1.00
## oracledziban 0.13 0.28 -0.42 0.71 1.00
## searchdfs -1.07 0.29 -1.64 -0.51 1.00
## datasetmovies:oracledziban -0.10 0.38 -0.88 0.64 1.00
## datasetmovies:searchdfs 0.69 0.39 -0.08 1.43 1.00
## oracledziban:searchdfs 0.23 0.40 -0.57 1.00 1.00
## datasetmovies:oracledziban:searchdfs 0.30 0.54 -0.73 1.34 1.00
## Bulk_ESS Tail_ESS
## Intercept[1] 1554 1902
## Intercept[2] 1434 1989
## Intercept[3] 1587 1914
## Intercept[4] 1537 2166
## datasetmovies 1308 1883
## oracledziban 1086 1618
## searchdfs 1224 1852
## datasetmovies:oracledziban 1181 1849
## datasetmovies:searchdfs 1247 1787
## oracledziban:searchdfs 1042 1759
## datasetmovies:oracledziban:searchdfs 1077 1474
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## disc 1.00 0.00 1.00 1.00 1.00 3000 3000
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Trace plots help us check whether there is evidence of non-convergence for our model.
plot(models$efficiency)
In our pairs plots, we want to make sure we don’t have highly correlated parameters (highly correlated parameters means that our model has difficulty differentiating the effect of such parameters).
pairs(
models$efficiency,
pars = c("b_Intercept[1]",
"b_Intercept[2]",
"b_Intercept[3]"),
fixed = TRUE
)
pairs(
models$efficiency,
pars = c("b_datasetmovies",
"b_oracledziban",
"b_searchdfs"),
fixed = TRUE
)
We now look at a average response for efficiency using different combinations of search and oracle via draws from the model posterior. The thicker, shorter line represents the 95% credible interval, while the thinner, longer line represents the 50% credible interval.
draw_data_efficiency <- user_response_data %>%
add_predicted_draws(models$efficiency,
seed = seed,
re_formula = NA) %>%
group_by(search, oracle, .draw) %>%
mutate(rating = weighted.mean(as.numeric(as.character(.prediction))))
efficiency_plot <- draw_data_efficiency %>%
ggplot(aes(x = oracle, y = rating)) +
stat_eye(.width = c(.95, .5)) +
theme_minimal() +
coord_cartesian(ylim = c(-2, 2)) +
facet_grid(. ~ search)
efficiency_plot
We can get the numeric values of the interval boundaries shown above with mean_qi
fit_info_efficiency <- draw_data_efficiency %>% group_by(search, oracle) %>% mean_qi(rating, .width = c(.95, .5))
fit_info_efficiency
## # A tibble: 8 x 8
## # Groups: search [2]
## search oracle rating .lower .upper .width .point .interval
## <fct> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs compassql 0.607 0.179 1.00 0.95 mean qi
## 2 bfs dziban 0.669 0.283 1.05 0.95 mean qi
## 3 dfs compassql -0.163 -0.567 0.25 0.95 mean qi
## 4 dfs dziban 0.340 -0.05 0.733 0.95 mean qi
## 5 bfs compassql 0.607 0.464 0.75 0.5 mean qi
## 6 bfs dziban 0.669 0.55 0.8 0.5 mean qi
## 7 dfs compassql -0.163 -0.3 -0.0292 0.5 mean qi
## 8 dfs dziban 0.340 0.2 0.483 0.5 mean qi
## Saving 7 x 5 in image
Next, we want to see if there is any significant difference in completion time between the two search algorithms (bfs and dfs) and the two oracles (dzbian and compassql).
efficiency_predictive_data <- user_response_data %>% add_predicted_draws(models$efficiency, seed = seed, re_formula = NA)
efficiency_predictive_data$alg <- paste(efficiency_predictive_data$search, efficiency_predictive_data$oracle)
Differences in user score by search algorithm.
search_differences$efficiency <- efficiency_predictive_data %>%
group_by(search, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = search) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'search' (override with `.groups` argument)
search_differences$efficiency$metric = "efficiency"
search_differences$efficiency %>%
ggplot(aes(x = diff_in_rating, y = "efficiency")) +
xlab(paste0("Expected Difference in Rating (",search_differences$efficiency[1,'search'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by oracle.
oracle_differences$efficiency <- efficiency_predictive_data %>%
group_by(oracle, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = oracle) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'oracle' (override with `.groups` argument)
oracle_differences$efficiency$metric = "efficiency"
oracle_differences$efficiency %>%
ggplot(aes(x = diff_in_rating, y = "efficiency")) +
xlab(paste0("Expected Difference in Rating (",oracle_differences$efficiency[1,'oracle'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by search and oracle combination (dfs compassql vs bfs dziban only)
alg_differences$efficiency <- efficiency_predictive_data %>%
group_by(alg, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = alg) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'alg' (override with `.groups` argument)
alg_differences$efficiency <- subset(alg_differences$efficiency, alg == "dfs compassql - bfs dziban")
alg_differences$efficiency$metric = "efficiency"
alg_differences$efficiency %>%
ggplot(aes(x = diff_in_rating, y = "efficiency")) +
xlab(paste0("Expected Difference in Rating (",alg_differences$efficiency[1,'alg'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
filename = "ease_of_use"
models$ease_of_use <- brm(
formula = bf(ease.of.use ~ dataset * oracle * search),
family = cumulative("probit"),
prior = prior(normal(0.26, 1.26), class = Intercept),
chains = 2,
cores = 2,
iter = 2500,
warmup = 1000,
data = data,
control = list(adapt_delta = 0.99),
file = filename,
seed = seed
)
Check some diagnostics regarding our model. Rhat should be close to 1 and Bulk_ESS should be in the thousands.
summary(models$ease_of_use)
## Family: cumulative
## Links: mu = probit; disc = identity
## Formula: ease.of.use ~ dataset * oracle * search
## Data: data (Number of observations: 236)
## Samples: 2 chains, each with iter = 2500; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept[1] -2.55 0.26 -3.06 -2.05 1.00
## Intercept[2] -1.50 0.23 -1.94 -1.07 1.00
## Intercept[3] -0.85 0.22 -1.29 -0.43 1.00
## Intercept[4] 0.56 0.22 0.13 0.99 1.00
## datasetmovies -0.28 0.29 -0.85 0.29 1.00
## oracledziban -0.33 0.30 -0.94 0.26 1.00
## searchdfs -1.27 0.29 -1.84 -0.71 1.00
## datasetmovies:oracledziban 0.37 0.40 -0.42 1.15 1.00
## datasetmovies:searchdfs 0.74 0.40 -0.02 1.53 1.00
## oracledziban:searchdfs 0.59 0.41 -0.26 1.38 1.00
## datasetmovies:oracledziban:searchdfs -0.10 0.58 -1.21 1.05 1.00
## Bulk_ESS Tail_ESS
## Intercept[1] 1427 1915
## Intercept[2] 1495 2098
## Intercept[3] 1467 1821
## Intercept[4] 1459 1817
## datasetmovies 1202 1627
## oracledziban 1206 1794
## searchdfs 1243 1729
## datasetmovies:oracledziban 1169 1670
## datasetmovies:searchdfs 1128 1670
## oracledziban:searchdfs 1108 1713
## datasetmovies:oracledziban:searchdfs 1062 1687
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## disc 1.00 0.00 1.00 1.00 1.00 3000 3000
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Trace plots help us check whether there is evidence of non-convergence for our model.
plot(models$ease_of_use)
In our pairs plots, we want to make sure we don’t have highly correlated parameters (highly correlated parameters means that our model has difficulty differentiating the effect of such parameters).
pairs(
models$ease_of_use,
pars = c("b_Intercept[1]",
"b_Intercept[2]",
"b_Intercept[3]"),
fixed = TRUE
)
pairs(
models$ease_of_use,
pars = c("b_datasetmovies",
"b_oracledziban",
"b_searchdfs"),
fixed = TRUE
)
We now look at a average response for ease of use using different combinations of search and oracle via draws from the model posterior. The thicker, shorter line represents the 95% credible interval, while the thinner, longer line represents the 50% credible interval.
draw_data_ease_of_use <- user_response_data %>%
add_predicted_draws(models$ease_of_use,
seed = seed,
re_formula = NA) %>%
group_by(search, oracle, .draw) %>%
mutate(rating = weighted.mean(as.numeric(as.character(.prediction))))
ease_of_use_plot <- draw_data_ease_of_use %>%
ggplot(aes(x = oracle, y = rating)) +
stat_eye(.width = c(.95, .5)) +
theme_minimal() +
coord_cartesian(ylim = c(-2, 2)) +
facet_grid(. ~ search)
ease_of_use_plot
We can get the numeric values of the interval boundaries shown above with mean_qi
fit_info_ease_of_use <- draw_data_ease_of_use %>% group_by(search, oracle) %>% mean_qi(rating, .width = c(.95, .5))
fit_info_ease_of_use
## # A tibble: 8 x 8
## # Groups: search [2]
## search oracle rating .lower .upper .width .point .interval
## <fct> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs compassql 0.900 0.554 1.21 0.95 mean qi
## 2 bfs dziban 0.787 0.450 1.1 0.95 mean qi
## 3 dfs compassql 0.0999 -0.283 0.467 0.95 mean qi
## 4 dfs dziban 0.467 0.117 0.8 0.95 mean qi
## 5 bfs compassql 0.900 0.786 1.02 0.5 mean qi
## 6 bfs dziban 0.787 0.683 0.9 0.5 mean qi
## 7 dfs compassql 0.0999 -0.0333 0.233 0.5 mean qi
## 8 dfs dziban 0.467 0.35 0.583 0.5 mean qi
## Saving 7 x 5 in image
Next, we want to see if there is any significant difference in completion time between the two search algorithms (bfs and dfs) and the two oracles (dzbian and compassql).
ease_of_use_predictive_data <- user_response_data %>% add_predicted_draws(models$ease_of_use, seed = seed, re_formula = NA)
ease_of_use_predictive_data$alg <- paste(ease_of_use_predictive_data$search, ease_of_use_predictive_data$oracle)
Differences in user score by search algorithm.
search_differences$ease_of_use <- ease_of_use_predictive_data %>%
group_by(search, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = search) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'search' (override with `.groups` argument)
search_differences$ease_of_use$metric = "ease.of.use"
search_differences$ease_of_use %>%
ggplot(aes(x = diff_in_rating, y = "ease.of.use")) +
xlab(paste0("Expected Difference in Rating (",search_differences$ease_of_use[1,'search'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by oracle.
oracle_differences$ease_of_use <- ease_of_use_predictive_data %>%
group_by(oracle, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = oracle) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'oracle' (override with `.groups` argument)
oracle_differences$ease_of_use$metric = "ease.of.use"
oracle_differences$ease_of_use %>%
ggplot(aes(x = diff_in_rating, y = "ease.of.use")) +
xlab(paste0("Expected Difference in Rating (",oracle_differences$ease_of_use[1,'oracle'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by search and oracle combination (dfs compassql vs bfs dziban only)
alg_differences$ease_of_use <- ease_of_use_predictive_data %>%
group_by(alg, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = alg) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'alg' (override with `.groups` argument)
alg_differences$ease_of_use <- subset(alg_differences$ease_of_use, alg == "dfs compassql - bfs dziban")
alg_differences$ease_of_use$metric = "ease.of.use"
alg_differences$ease_of_use %>%
ggplot(aes(x = diff_in_rating, y = "ease.of.use")) +
xlab(paste0("Expected Difference in Rating (",alg_differences$ease_of_use[1,'alg'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
filename = "utility"
models$utility <- brm(
formula = bf(utility ~ dataset * oracle * search),
family = cumulative("probit"),
prior = prior(normal(0.26, 1.26), class = Intercept),
chains = 2,
cores = 2,
iter = 2500,
warmup = 1000,
data = data,
control = list(adapt_delta = 0.99),
file = filename,
seed = seed
)
Check some diagnostics regarding our model. Rhat should be close to 1 and Bulk_ESS should be in the thousands.
summary(models$utility)
## Family: cumulative
## Links: mu = probit; disc = identity
## Formula: utility ~ dataset * oracle * search
## Data: data (Number of observations: 236)
## Samples: 2 chains, each with iter = 2500; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept[1] -1.62 0.22 -2.05 -1.18 1.00
## Intercept[2] -0.71 0.20 -1.11 -0.31 1.00
## Intercept[3] -0.23 0.20 -0.62 0.17 1.00
## Intercept[4] 0.72 0.21 0.32 1.16 1.00
## datasetmovies -0.15 0.28 -0.69 0.41 1.00
## oracledziban 0.22 0.28 -0.34 0.79 1.00
## searchdfs -0.75 0.28 -1.29 -0.17 1.00
## datasetmovies:oracledziban -0.10 0.40 -0.88 0.68 1.00
## datasetmovies:searchdfs 0.58 0.39 -0.16 1.34 1.00
## oracledziban:searchdfs 0.05 0.39 -0.74 0.81 1.00
## datasetmovies:oracledziban:searchdfs 0.27 0.55 -0.81 1.35 1.00
## Bulk_ESS Tail_ESS
## Intercept[1] 1303 2133
## Intercept[2] 1571 2009
## Intercept[3] 1656 2050
## Intercept[4] 1693 1671
## datasetmovies 1145 1875
## oracledziban 1314 1762
## searchdfs 1224 1826
## datasetmovies:oracledziban 1150 1763
## datasetmovies:searchdfs 1074 1801
## oracledziban:searchdfs 1151 1836
## datasetmovies:oracledziban:searchdfs 1093 1730
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## disc 1.00 0.00 1.00 1.00 1.00 3000 3000
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Trace plots help us check whether there is evidence of non-convergence for our model.
plot(models$utility)
In our pairs plots, we want to make sure we don’t have highly correlated parameters (highly correlated parameters means that our model has difficulty differentiating the effect of such parameters).
pairs(
models$utility,
pars = c("b_Intercept[1]",
"b_Intercept[2]",
"b_Intercept[3]"),
fixed = TRUE
)
pairs(
models$utility,
pars = c("b_datasetmovies",
"b_oracledziban",
"b_searchdfs"),
fixed = TRUE
)
We now look at a average response for Utility using different combinations of search and oracle via draws from the model posterior. The thicker, shorter line represents the 95% credible interval, while the thinner, longer line represents the 50% credible interval.
draw_data_utility <- user_response_data %>%
add_predicted_draws(models$utility,
seed = seed,
re_formula = NA) %>%
group_by(search, oracle, .draw) %>%
mutate(rating = weighted.mean(as.numeric(as.character(.prediction))))
utility_plot <- draw_data_utility %>%
ggplot(aes(x = oracle, y = rating)) +
stat_eye(.width = c(.95, .5)) +
theme_minimal() +
coord_cartesian(ylim = c(-2, 2)) +
facet_grid(. ~ search)
utility_plot
We can get the numeric values of the interval boundaries shown above with mean_qi
fit_info_utility <- draw_data_utility %>% group_by(search, oracle) %>% mean_qi(rating, .width = c(.95, .5))
fit_info_utility
## # A tibble: 8 x 8
## # Groups: search [2]
## search oracle rating .lower .upper .width .point .interval
## <fct> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs compassql 0.446 0 0.893 0.95 mean qi
## 2 bfs dziban 0.619 0.2 1.03 0.95 mean qi
## 3 dfs compassql -0.0633 -0.5 0.367 0.95 mean qi
## 4 dfs dziban 0.344 -0.0667 0.767 0.95 mean qi
## 5 bfs compassql 0.446 0.304 0.589 0.5 mean qi
## 6 bfs dziban 0.619 0.483 0.767 0.5 mean qi
## 7 dfs compassql -0.0633 -0.217 0.0833 0.5 mean qi
## 8 dfs dziban 0.344 0.2 0.5 0.5 mean qi
## Saving 7 x 5 in image
Next, we want to see if there is any significant difference in completion time between the two search algorithms (bfs and dfs) and the two oracles (dzbian and compassql).
utility_predictive_data <- user_response_data %>% add_predicted_draws(models$utility, seed = seed, re_formula = NA)
utility_predictive_data$alg <- paste(utility_predictive_data$search, utility_predictive_data$oracle)
Differences in user score by search algorithm.
search_differences$utility <- utility_predictive_data %>%
group_by(search, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = search) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'search' (override with `.groups` argument)
search_differences$utility$metric = "utility"
search_differences$utility %>%
ggplot(aes(x = diff_in_rating, y = "utility")) +
xlab(paste0("Expected Difference in Rating (",search_differences$utility[1,'search'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by oracle.
oracle_differences$utility <- utility_predictive_data %>%
group_by(oracle, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = oracle) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'oracle' (override with `.groups` argument)
oracle_differences$utility$metric = "utility"
oracle_differences$utility %>%
ggplot(aes(x = diff_in_rating, y = "utility")) +
xlab(paste0("Expected Difference in Rating (",oracle_differences$utility[1,'oracle'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by search and oracle combination (dfs compassql vs bfs dziban only)
alg_differences$utility <- utility_predictive_data %>%
group_by(alg, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = alg) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'alg' (override with `.groups` argument)
alg_differences$utility <- subset(alg_differences$utility, alg == "dfs compassql - bfs dziban")
alg_differences$utility$metric = "utility"
alg_differences$utility %>%
ggplot(aes(x = diff_in_rating, y = "utility")) +
xlab(paste0("Expected Difference in Rating (",alg_differences$utility[1,'alg'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
filename = "overall"
models$overall <- brm(
formula = bf(overall ~ dataset * oracle * search),
family = cumulative("probit"),
prior = prior(normal(0.26, 1.26), class = Intercept),
chains = 2,
cores = 2,
iter = 2500,
warmup = 1000,
data = data,
control = list(adapt_delta = 0.99),
file = filename,
seed = seed
)
Check some diagnostics regarding our model. Rhat should be close to 1 and Bulk_ESS should be in the thousands.
summary(models$overall)
## Family: cumulative
## Links: mu = probit; disc = identity
## Formula: overall ~ dataset * oracle * search
## Data: data (Number of observations: 236)
## Samples: 2 chains, each with iter = 2500; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept[1] -2.21 0.25 -2.72 -1.72 1.00
## Intercept[2] -1.36 0.22 -1.78 -0.94 1.00
## Intercept[3] -0.57 0.21 -0.98 -0.15 1.00
## Intercept[4] 0.72 0.21 0.32 1.14 1.00
## datasetmovies -0.36 0.28 -0.90 0.19 1.00
## oracledziban 0.07 0.29 -0.52 0.63 1.00
## searchdfs -0.59 0.29 -1.16 -0.03 1.00
## datasetmovies:oracledziban 0.01 0.39 -0.74 0.80 1.00
## datasetmovies:searchdfs 0.32 0.40 -0.45 1.11 1.00
## oracledziban:searchdfs 0.03 0.40 -0.75 0.84 1.00
## datasetmovies:oracledziban:searchdfs 0.35 0.55 -0.72 1.41 1.00
## Bulk_ESS Tail_ESS
## Intercept[1] 1302 1661
## Intercept[2] 1324 2011
## Intercept[3] 1289 2017
## Intercept[4] 1467 2263
## datasetmovies 1172 1880
## oracledziban 1218 1632
## searchdfs 1329 1520
## datasetmovies:oracledziban 1145 1456
## datasetmovies:searchdfs 1288 1642
## oracledziban:searchdfs 1195 1351
## datasetmovies:oracledziban:searchdfs 1163 1593
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## disc 1.00 0.00 1.00 1.00 1.00 3000 3000
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Trace plots help us check whether there is evidence of non-convergence for our model.
plot(models$overall)
In our pairs plots, we want to make sure we don’t have highly correlated parameters (highly correlated parameters means that our model has difficulty differentiating the effect of such parameters).
pairs(
models$overall,
pars = c("b_Intercept[1]",
"b_Intercept[2]",
"b_Intercept[3]"),
fixed = TRUE
)
pairs(
models$overall,
pars = c("b_datasetmovies",
"b_oracledziban",
"b_searchdfs"),
fixed = TRUE
)
We now look at a average response for Overall using different combinations of search and oracle via draws from the model posterior. The thicker, shorter line represents the 95% credible interval, while the thinner, longer line represents the 50% credible interval.
draw_data_overall <- user_response_data %>%
add_predicted_draws(models$overall,
seed = seed,
re_formula = NA) %>%
group_by(search, oracle, .draw) %>%
mutate(rating = weighted.mean(as.numeric(as.character(.prediction))))
overall_plot <- draw_data_overall %>%
ggplot(aes(x = oracle, y = rating)) +
stat_eye(.width = c(.95, .5)) +
theme_minimal() +
coord_cartesian(ylim = c(-2, 2)) +
facet_grid(. ~ search)
overall_plot
We can get the numeric values of the interval boundaries shown above with mean_qi
fit_info_overall <- draw_data_overall %>% group_by(search, oracle) %>% mean_qi(rating, .width = c(.95, .5))
fit_info_overall
## # A tibble: 8 x 8
## # Groups: search [2]
## search oracle rating .lower .upper .width .point .interval
## <fct> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs compassql 0.684 0.304 1.02 0.95 mean qi
## 2 bfs dziban 0.735 0.4 1.07 0.95 mean qi
## 3 dfs compassql 0.283 -0.0833 0.65 0.95 mean qi
## 4 dfs dziban 0.554 0.2 0.9 0.95 mean qi
## 5 bfs compassql 0.684 0.571 0.804 0.5 mean qi
## 6 bfs dziban 0.735 0.617 0.85 0.5 mean qi
## 7 dfs compassql 0.283 0.15 0.417 0.5 mean qi
## 8 dfs dziban 0.554 0.433 0.683 0.5 mean qi
## Saving 7 x 5 in image
Next, we want to see if there is any significant difference in completion time between the two search algorithms (bfs and dfs) and the two oracles (dzbian and compassql).
overall_predictive_data <- user_response_data %>% add_predicted_draws(models$overall, seed = seed, re_formula = NA)
overall_predictive_data$alg <- paste(overall_predictive_data$search, overall_predictive_data$oracle)
Differences in user score by search algorithm.
search_differences$overall <- overall_predictive_data %>%
group_by(search, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = search) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'search' (override with `.groups` argument)
search_differences$overall$metric = "overall"
search_differences$overall %>%
ggplot(aes(x = diff_in_rating, y = "overall")) +
xlab(paste0("Expected Difference in Rating (",search_differences$overall[1,'search'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by oracle.
oracle_differences$overall <- overall_predictive_data %>%
group_by(oracle, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = oracle) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'oracle' (override with `.groups` argument)
oracle_differences$overall$metric = "overall"
oracle_differences$overall %>%
ggplot(aes(x = diff_in_rating, y = "overall")) +
xlab(paste0("Expected Difference in Rating (",oracle_differences$overall[1,'oracle'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Differences in user score by search and oracle combination (dfs compassql vs bfs dziban only)
alg_differences$overall <- overall_predictive_data %>%
group_by(alg, .draw) %>%
summarize(rating = weighted.mean(as.numeric(.prediction))) %>%
compare_levels(rating, by = alg) %>%
rename(diff_in_rating = rating)
## `summarise()` regrouping output by 'alg' (override with `.groups` argument)
alg_differences$overall <- subset(alg_differences$overall, alg == "dfs compassql - bfs dziban")
alg_differences$overall$metric = "overall"
alg_differences$overall %>%
ggplot(aes(x = diff_in_rating, y = "overall")) +
xlab(paste0("Expected Difference in Rating (",alg_differences$overall[1,'alg'],")")) +
ylab("Condition")+
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
Putting the all of the plots for search algorithm and oracle differences together, split by whether the rating metric is of type confidence or preference We’ll start with differences in search algorithms.
combined_search_differences <- rbind(search_differences$confidence_udata, search_differences$confidence_ans, search_differences$efficiency, search_differences$ease_of_use, search_differences$utility, search_differences$overall)
combined_search_differences$metric <- factor(combined_search_differences$metric, levels=rev(analyses))
# flip order so that we get bfs - dfs
if(combined_search_differences[1,'search']=="dfs - bfs"){
combined_search_differences$search = 'bfs - dfs'
combined_search_differences$diff_in_rating = -1 * combined_search_differences$diff_in_rating
}
combined_search_differences_confidence <- subset(combined_search_differences, metric %in% confidence_metrics)
search_differences_plot_confidence <- combined_search_differences_confidence %>%
ggplot(aes(x = diff_in_rating, y = metric)) +
ylab("Confidence") +
xlab(paste0("Expected Difference in Rating (",combined_search_differences_confidence[1,'search'],")")) +
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
search_differences_plot_confidence
View intervals
fit_info_search_differences_confidence <- combined_search_differences_confidence %>% group_by(search, metric) %>% mean_qi(diff_in_rating, .width = c(.95, .5))
fit_info_search_differences_confidence
## # A tibble: 4 x 8
## # Groups: search [1]
## search metric diff_in_rating .lower .upper .width .point .interval
## <chr> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs - dfs confidence.ans -0.0341 -0.323 0.248 0.95 mean qi
## 2 bfs - dfs confidence.ud… 0.0391 -0.228 0.294 0.95 mean qi
## 3 bfs - dfs confidence.ans -0.0341 -0.127 0.0579 0.5 mean qi
## 4 bfs - dfs confidence.ud… 0.0391 -0.0489 0.128 0.5 mean qi
combined_search_differences_preference <- subset(combined_search_differences, metric %in% preference_metrics)
search_differences_plot_preference <- combined_search_differences_preference %>%
ggplot(aes(x = diff_in_rating, y = metric)) +
ylab("Confidence") +
xlab(paste0("Expected Difference in Rating (",combined_search_differences_preference[1,'search'],")")) +
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
search_differences_plot_preference
View intervals
fit_info_search_differences_preference <- combined_search_differences_preference %>% group_by(search, metric) %>% mean_qi(diff_in_rating, .width = c(.95, .5))
fit_info_search_differences_preference
## # A tibble: 8 x 8
## # Groups: search [1]
## search metric diff_in_rating .lower .upper .width .point .interval
## <chr> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 bfs - dfs overall 0.292 -0.0661 0.636 0.95 mean qi
## 2 bfs - dfs utility 0.395 -0.0351 0.801 0.95 mean qi
## 3 bfs - dfs ease.of.use 0.558 0.193 0.895 0.95 mean qi
## 4 bfs - dfs efficiency 0.551 0.157 0.957 0.95 mean qi
## 5 bfs - dfs overall 0.292 0.169 0.414 0.5 mean qi
## 6 bfs - dfs utility 0.395 0.250 0.547 0.5 mean qi
## 7 bfs - dfs ease.of.use 0.558 0.441 0.677 0.5 mean qi
## 8 bfs - dfs efficiency 0.551 0.414 0.688 0.5 mean qi
combined_oracle_differences <- rbind(oracle_differences$confidence_udata, oracle_differences$confidence_ans, oracle_differences$efficiency, oracle_differences$ease_of_use, oracle_differences$utility, oracle_differences$overall)
combined_oracle_differences$metric <- factor(combined_oracle_differences$metric, levels=rev(analyses))
combined_oracle_differences_confidence <- subset(combined_oracle_differences, metric %in% confidence_metrics)
oracle_differences_plot_confidence <- combined_oracle_differences_confidence %>%
ggplot(aes(x = diff_in_rating, y = metric)) +
ylab("Confidence") +
xlab(paste0("Expected Difference in Rating (",combined_oracle_differences_confidence[1,'oracle'],")")) +
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
oracle_differences_plot_confidence
View intervals
fit_info_oracle_differences_confidence <- combined_oracle_differences_confidence %>% group_by(oracle, metric) %>% mean_qi(diff_in_rating, .width = c(.95, .5))
fit_info_oracle_differences_confidence
## # A tibble: 4 x 8
## # Groups: oracle [1]
## oracle metric diff_in_rating .lower .upper .width .point .interval
## <chr> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 dziban - co… confidence… 0.194 -0.0808 0.472 0.95 mean qi
## 2 dziban - co… confidence… 0.245 -0.0155 0.501 0.95 mean qi
## 3 dziban - co… confidence… 0.194 0.102 0.292 0.5 mean qi
## 4 dziban - co… confidence… 0.245 0.155 0.339 0.5 mean qi
combined_oracle_differences_preference <- subset(combined_oracle_differences, metric %in% preference_metrics)
oracle_differences_plot_preference <- combined_oracle_differences_preference %>%
ggplot(aes(x = diff_in_rating, y = metric)) +
ylab("Confidence") +
xlab(paste0("Expected Difference in Rating (",combined_oracle_differences_preference[1,'oracle'],")")) +
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
oracle_differences_plot_preference
View intervals
fit_info_oracle_differences_preference <- combined_oracle_differences_preference %>% group_by(oracle, metric) %>% mean_qi(diff_in_rating, .width = c(.95, .5))
fit_info_oracle_differences_preference
## # A tibble: 8 x 8
## # Groups: oracle [1]
## oracle metric diff_in_rating .lower .upper .width .point .interval
## <chr> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 dziban - comp… overall 0.168 -0.184 0.525 0.95 mean qi
## 2 dziban - comp… utility 0.299 -0.134 0.728 0.95 mean qi
## 3 dziban - comp… ease.of.… 0.141 -0.205 0.475 0.95 mean qi
## 4 dziban - comp… efficien… 0.296 -0.107 0.702 0.95 mean qi
## 5 dziban - comp… overall 0.168 0.0481 0.286 0.5 mean qi
## 6 dziban - comp… utility 0.299 0.150 0.446 0.5 mean qi
## 7 dziban - comp… ease.of.… 0.141 0.0230 0.262 0.5 mean qi
## 8 dziban - comp… efficien… 0.296 0.154 0.438 0.5 mean qi
combined_alg_differences <- rbind(alg_differences$confidence_udata, alg_differences$confidence_ans, alg_differences$efficiency, alg_differences$ease_of_use, alg_differences$utility, alg_differences$overall)
combined_alg_differences$metric <- factor(combined_alg_differences$metric, levels=rev(analyses))
# flip order so that we get bfs - dfs
if(combined_alg_differences[1,'alg']=="dfs - bfs"){
combined_alg_differences$alg = 'bfs - dfs'
combined_alg_differences$diff_in_rating = -1 * combined_alg_differences$diff_in_rating
}
combined_alg_differences_confidence <- subset(combined_alg_differences, metric %in% confidence_metrics)
alg_differences_plot_confidence <- combined_alg_differences_confidence %>%
ggplot(aes(x = diff_in_rating, y = metric)) +
ylab("Confidence") +
xlab(paste0("Expected Difference in Rating (",combined_alg_differences_confidence[1,'alg'],")")) +
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
alg_differences_plot_confidence
View intervals
fit_info_alg_differences_confidence <- combined_alg_differences_confidence %>% group_by(alg, metric) %>% mean_qi(diff_in_rating, .width = c(.95, .5))
fit_info_alg_differences_confidence
## # A tibble: 4 x 8
## # Groups: alg [1]
## alg metric diff_in_rating .lower .upper .width .point .interval
## <chr> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 dfs compassql… confiden… -0.157 -0.533 0.233 0.95 mean qi
## 2 dfs compassql… confiden… -0.279 -0.650 0.0838 0.95 mean qi
## 3 dfs compassql… confiden… -0.157 -0.283 -0.0333 0.5 mean qi
## 4 dfs compassql… confiden… -0.279 -0.4 -0.150 0.5 mean qi
combined_alg_differences_preference <- subset(combined_alg_differences, metric %in% preference_metrics)
alg_differences_plot_preference <- combined_alg_differences_preference %>%
ggplot(aes(x = diff_in_rating, y = metric)) +
ylab("Confidence") +
xlab(paste0("Expected Difference in Rating (",combined_alg_differences_preference[1,'alg'],")")) +
stat_halfeye(.width = c(.95, .5)) +
geom_vline(xintercept = 0, linetype = "longdash") +
theme_minimal()
alg_differences_plot_preference
View intervals
fit_info_alg_differences_preference <- combined_alg_differences_preference %>% group_by(alg, metric) %>% mean_qi(diff_in_rating, .width = c(.95, .5))
fit_info_alg_differences_preference
## # A tibble: 8 x 8
## # Groups: alg [1]
## alg metric diff_in_rating .lower .upper .width .point .interval
## <chr> <fct> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 dfs compassql … overall -0.452 -0.933 0.0500 0.95 mean qi
## 2 dfs compassql … utility -0.682 -1.28 -0.0667 0.95 mean qi
## 3 dfs compassql … ease.of… -0.687 -1.17 -0.200 0.95 mean qi
## 4 dfs compassql … efficie… -0.832 -1.38 -0.267 0.95 mean qi
## 5 dfs compassql … overall -0.452 -0.621 -0.267 0.5 mean qi
## 6 dfs compassql … utility -0.682 -0.883 -0.479 0.5 mean qi
## 7 dfs compassql … ease.of… -0.687 -0.867 -0.517 0.5 mean qi
## 8 dfs compassql … efficie… -0.832 -1.03 -0.633 0.5 mean qi